}
}
+enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
+{
+ enum hvm_intblk r;
+ ASSERT(v == current);
+
+ r = hvm_funcs.interrupt_blocked(v, intack);
+ if ( r != hvm_intblk_none )
+ return r;
+
+ if ( intack.source == hvm_intsrc_lapic )
+ {
+ uint32_t tpr = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xF0;
+ if ( (tpr >> 4) >= (intack.vector >> 4) )
+ return hvm_intblk_tpr;
+ }
+
+ return r;
+}
+
static long hvm_grant_table_op(
unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
{
if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
- if ( (intack.source == hvm_intsrc_lapic) &&
- ((vmcb->vintr.fields.tpr & 0xf) >= (intack.vector >> 4)) )
- return hvm_intblk_tpr;
-
return hvm_intblk_none;
}
svm_asid_inc_generation();
}
-static void svm_update_vtpr(struct vcpu *v, unsigned long value)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- vmcb->vintr.fields.tpr = value & 0x0f;
-}
-
static void svm_sync_vmcb(struct vcpu *v)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
svm_asid_init_vcpu(v);
}
+ /* Reflect the vlapic's TPR in the hardware vtpr */
+ v->arch.hvm_svm.vmcb->vintr.fields.tpr =
+ (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;
+
hvm_do_resume(v);
reset_stack_and_jump(svm_asm_do_resume);
}
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.flush_guest_tlbs = svm_flush_guest_tlbs,
- .update_vtpr = svm_update_vtpr,
.stts = svm_stts,
.set_tsc_offset = svm_set_tsc_offset,
.inject_exception = svm_inject_exception,
domain_crash(v->domain);
break;
}
+
+ /* The exit may have updated the TPR: reflect this in the hardware vtpr */
+ vmcb->vintr.fields.tpr =
+ (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;
}
asmlinkage void svm_trace_vmentry(void)
{
case APIC_TASKPRI:
vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
- hvm_update_vtpr(v, (val >> 4) & 0x0f);
break;
case APIC_EOI:
if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
- if ( intack.source == hvm_intsrc_lapic )
- {
- uint32_t tpr = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xF0;
- if ( (tpr >> 4) >= (intack.vector >> 4) )
- return hvm_intblk_tpr;
- }
-
return hvm_intblk_none;
}
}
}
-static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
-{
- /* VMX doesn't have a V_TPR field */
-}
-
static int vmx_event_pending(struct vcpu *v)
{
ASSERT(v == current);
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
.flush_guest_tlbs = vmx_flush_guest_tlbs,
- .update_vtpr = vmx_update_vtpr,
.stts = vmx_stts,
.set_tsc_offset = vmx_set_tsc_offset,
.inject_exception = vmx_inject_exception,
*/
void (*flush_guest_tlbs)(void);
- /*
- * Reflect the virtual APIC's value in the guest's V_TPR register
- */
- void (*update_vtpr)(struct vcpu *v, unsigned long value);
-
/*
* Update specifics of the guest state:
* 1) TS bit in guest cr0
#define hvm_long_mode_enabled(v) (v,0)
#endif
-static inline enum hvm_intblk
-hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
-{
- ASSERT(v == current);
- return hvm_funcs.interrupt_blocked(v, intack);
-}
+enum hvm_intblk
+hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
static inline int
hvm_guest_x86_mode(struct vcpu *v)
hvm_funcs.update_host_cr3(v);
}
-static inline void
-hvm_update_vtpr(struct vcpu *v, unsigned long value)
-{
- hvm_funcs.update_vtpr(v, value);
-}
-
static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
hvm_funcs.update_guest_cr(v, cr);